panic_domain(NULL, "Unknown IPI cpu\n");
if (!targ->is_initialised ||
- test_bit(_VCPUF_down, &targ->vcpu_flags)) {
+ test_bit(_VPF_down, &targ->pause_flags)) {
struct pt_regs *targ_regs = vcpu_regs(targ);
struct vcpu_guest_context c;
targ_regs->cr_iip = d->arch.sal_data->boot_rdv_ip;
targ_regs->r1 = d->arch.sal_data->boot_rdv_r1;
- if (test_and_clear_bit(_VCPUF_down,&targ->vcpu_flags)) {
+ if (test_and_clear_bit(_VPF_down,&targ->pause_flags)) {
vcpu_wake(targ);
printk(XENLOG_DEBUG "arch_boot_vcpu: vcpu %d awaken %016lx!\n",
targ->vcpu_id, targ_regs->cr_iip);
p->state != STATE_IOREQ_INPROCESS)
break;
- set_bit(_VCPUF_blocked_in_xen, ¤t->vcpu_flags);
+ set_bit(_VPF_blocked_in_xen, ¤t->pause_flags);
mb(); /* set blocked status /then/ re-evaluate condition */
if (p->state != STATE_IOREQ_READY &&
p->state != STATE_IOREQ_INPROCESS)
{
- clear_bit(_VCPUF_blocked_in_xen, ¤t->vcpu_flags);
+ clear_bit(_VPF_blocked_in_xen, ¤t->pause_flags);
break;
}
v->is_initialised = 1;
/* Auto-online VCPU0 when it is initialised. */
if (v->vcpu_id == 0)
- clear_bit(_VCPUF_down, &v->vcpu_flags);
+ clear_bit(_VPF_down, &v->pause_flags);
}
return 0;
printk("Dom0: 0x%lx\n", (u64)dom0);
v->is_initialised = 1;
- clear_bit(_VCPUF_down, &v->vcpu_flags);
+ clear_bit(_VPF_down, &v->pause_flags);
/* Build firmware.
Note: Linux kernel reserve memory used by start_info, so there is
printk("*** CALLED SAL_UPDATE_PAL. IGNORED...\n");
break;
case SAL_XEN_SAL_RETURN:
- if (!test_and_set_bit(_VCPUF_down, ¤t->vcpu_flags))
+ if (!test_and_set_bit(_VPF_down, ¤t->pause_flags))
vcpu_sleep_nosync(current);
break;
case SN_SAL_GET_MASTER_NASID:
console_start_sync();
(*efi.reset_system)(EFI_RESET_SHUTDOWN,0,0,NULL);
} else {
- set_bit(_VCPUF_down, ¤t->vcpu_flags);
+ set_bit(_VPF_down, ¤t->pause_flags);
vcpu_sleep_nosync(current);
status = PAL_STATUS_SUCCESS;
}
if (vector == XEN_SAL_BOOT_RENDEZ_VEC
&& (!targ->is_initialised
- || test_bit(_VCPUF_down, &targ->vcpu_flags))) {
+ || test_bit(_VPF_down, &targ->pause_flags))) {
/* First start: initialize vpcu. */
if (!targ->is_initialised) {
vcpu_regs (targ)->r1 = d->arch.sal_data->boot_rdv_r1;
vcpu_regs (targ)->b0 = FW_HYPERCALL_SAL_RETURN_PADDR;
- if (test_and_clear_bit(_VCPUF_down,
- &targ->vcpu_flags)) {
+ if (test_and_clear_bit(_VPF_down,
+ &targ->pause_flags)) {
vcpu_wake(targ);
printk(XENLOG_INFO "arch_boot_vcpu: vcpu %d awaken\n",
targ->vcpu_id);
regs->r10 = x.r10; regs->r11 = x.r11;
break;
case FW_HYPERCALL_SAL_RETURN:
- if ( !test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
+ if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
vcpu_sleep_nosync(v);
break;
case FW_HYPERCALL_EFI_CALL:
v->is_initialised = 1;
/* Auto-online VCPU0 when it is initialised. */
if ( v->vcpu_id == 0 )
- clear_bit(_VCPUF_down, &v->vcpu_flags);
+ clear_bit(_VPF_down, &v->pause_flags);
}
cpu_init_vcpu(v);
ofd_dom0_fixup(d, *ofh_tree + rma, cmdline, shared_info_addr);
v->is_initialised = 1;
- clear_bit(_VCPUF_down, &v->vcpu_flags);
+ clear_bit(_VPF_down, &v->pause_flags);
rc = 0;
out:
if ( flags & VGCF_online )
- clear_bit(_VCPUF_down, &v->vcpu_flags);
+ clear_bit(_VPF_down, &v->pause_flags);
else
- set_bit(_VCPUF_down, &v->vcpu_flags);
+ set_bit(_VPF_down, &v->pause_flags);
return 0;
#undef c
}
update_domain_wallclock_time(d);
v->is_initialised = 1;
- clear_bit(_VCPUF_down, &v->vcpu_flags);
+ clear_bit(_VPF_down, &v->pause_flags);
/*
* Initial register values:
c(flags &= ~(VGCF_i387_valid|VGCF_in_kernel));
if ( v->fpu_initialised )
c(flags |= VGCF_i387_valid);
- if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
+ if ( !test_bit(_VPF_down, &v->pause_flags) )
c(flags |= VGCF_online);
if ( is_hvm_vcpu(v) )
{
/* We don't need to save state for a vcpu that is down; the restore
* code will leave it down if there is nothing saved. */
- if ( test_bit(_VCPUF_down, &v->vcpu_flags) )
+ if ( test_bit(_VPF_down, &v->pause_flags) )
continue;
hvm_funcs.save_cpu_ctxt(v, &ctxt);
return -EINVAL;
/* Auxiliary processors should be woken immediately. */
- if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
+ if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
vcpu_wake(v);
return 0;
hvm_funcs.vcpu_initialise(v);
- set_bit(_VCPUF_down, &v->vcpu_flags);
- clear_bit(_VCPUF_blocked, &v->vcpu_flags);
+ set_bit(_VPF_down, &v->pause_flags);
+ clear_bit(_VPF_blocked, &v->pause_flags);
v->fpu_initialised = 0;
v->fpu_dirtied = 0;
v->is_initialised = 0;
d->domain_id, v->vcpu_id);
/* Doesn't halt us immediately, but we'll never return to guest context. */
- set_bit(_VCPUF_down, &v->vcpu_flags);
+ set_bit(_VPF_down, &v->pause_flags);
vcpu_sleep_nosync(v);
/* Any other VCPUs online? ... */
LOCK_BIGLOCK(d);
for_each_vcpu ( d, v )
- if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
+ if ( !test_bit(_VPF_down, &v->pause_flags) )
online_count++;
UNLOCK_BIGLOCK(d);
goto out;
}
- if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
+ if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
vcpu_wake(v);
gdprintk(XENLOG_INFO, "AP %d bringup suceeded.\n", vcpuid);
/* Down all the vcpus: we only re-enable the ones that had state saved. */
for_each_vcpu(d, v)
- if ( test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
+ if ( test_and_set_bit(_VPF_down, &v->pause_flags) )
vcpu_sleep_nosync(v);
while(1) {
struct list_head *list;
struct periodic_time *pt;
- if ( test_bit(_VCPUF_blocked, &v->vcpu_flags) )
+ if ( test_bit(_VPF_blocked, &v->pause_flags) )
return;
v->arch.hvm_vcpu.guest_time = hvm_get_guest_time(v);
arch.guest_context.kernel_sp);
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
- OFFSET(VCPU_flags, struct vcpu, vcpu_flags);
OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
OFFSET(VCPU_kernel_ss, struct vcpu, arch.guest_context.kernel_ss);
OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags);
OFFSET(VCPU_arch_guest_fpu_ctxt, struct vcpu, arch.guest_context.fpu_ctxt);
- OFFSET(VCPU_flags, struct vcpu, vcpu_flags);
OFFSET(VCPU_nmi_addr, struct vcpu, nmi_addr);
OFFSET(VCPU_nmi_pending, struct vcpu, nmi_pending);
OFFSET(VCPU_nmi_masked, struct vcpu, nmi_masked);
if ( !is_idle_domain(d) )
{
- set_bit(_VCPUF_down, &v->vcpu_flags);
+ set_bit(_VPF_down, &v->pause_flags);
v->vcpu_info = shared_info_addr(d, vcpu_info[vcpu_id]);
}
if ( rc != 0 )
goto out;
- set_bit(_VCPUF_down, &v->vcpu_flags);
+ set_bit(_VPF_down, &v->pause_flags);
v->fpu_initialised = 0;
v->fpu_dirtied = 0;
v->is_initialised = 0;
v->nmi_pending = 0;
v->nmi_masked = 0;
- clear_bit(_VCPUF_blocked, &v->vcpu_flags);
+ clear_bit(_VPF_blocked, &v->pause_flags);
out:
UNLOCK_BIGLOCK(v->domain);
if ( !v->is_initialised )
return -EINVAL;
- if ( test_and_clear_bit(_VCPUF_down, &v->vcpu_flags) )
+ if ( test_and_clear_bit(_VPF_down, &v->pause_flags) )
vcpu_wake(v);
break;
case VCPUOP_down:
- if ( !test_and_set_bit(_VCPUF_down, &v->vcpu_flags) )
+ if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
vcpu_sleep_nosync(v);
break;
case VCPUOP_is_up:
- rc = !test_bit(_VCPUF_down, &v->vcpu_flags);
+ rc = !test_bit(_VPF_down, &v->pause_flags);
break;
case VCPUOP_get_runstate_info:
vcpu_runstate_get(v, &runstate);
cpu_time += runstate.time[RUNSTATE_running];
info->max_vcpu_id = v->vcpu_id;
- if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
+ if ( !test_bit(_VPF_down, &v->pause_flags) )
{
- if ( !(v->vcpu_flags & VCPUF_blocked) )
+ if ( !(v->pause_flags & VPF_blocked) )
flags &= ~XEN_DOMINF_blocked;
if ( v->is_running )
flags |= XEN_DOMINF_running;
rcu_read_lock(&domlist_read_lock);
for_each_domain ( d )
for_each_vcpu ( d, v )
- if ( !test_bit(_VCPUF_down, &v->vcpu_flags) )
+ if ( !test_bit(_VPF_down, &v->pause_flags) )
cnt[v->processor]++;
rcu_read_unlock(&domlist_read_lock);
vcpu_runstate_get(v, &runstate);
- op->u.getvcpuinfo.online = !test_bit(_VCPUF_down, &v->vcpu_flags);
- op->u.getvcpuinfo.blocked = test_bit(_VCPUF_blocked, &v->vcpu_flags);
+ op->u.getvcpuinfo.online = !test_bit(_VPF_down, &v->pause_flags);
+ op->u.getvcpuinfo.blocked = test_bit(_VPF_blocked, &v->pause_flags);
op->u.getvcpuinfo.running = v->is_running;
op->u.getvcpuinfo.cpu_time = runstate.time[RUNSTATE_running];
op->u.getvcpuinfo.cpu = v->processor;
if ( rchn->consumer_is_xen )
{
/* Xen consumers need notification only if they are blocked. */
- if ( test_and_clear_bit(_VCPUF_blocked_in_xen,
- &rvcpu->vcpu_flags) )
+ if ( test_and_clear_bit(_VPF_blocked_in_xen,
+ &rvcpu->pause_flags) )
vcpu_wake(rvcpu);
}
else
"upcall_pend = %02x, upcall_mask = %02x ",
v->vcpu_id, v->processor,
v->is_running ? 'T':'F',
- v->vcpu_flags,
+ v->pause_flags,
vcpu_info(v, evtchn_upcall_pending),
vcpu_info(v, evtchn_upcall_mask));
cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
{
CSCHED_VCPU_STAT_CRANK(svc, migrate_r);
CSCHED_STAT_CRANK(migrate_running);
- set_bit(_VCPUF_migrating, ¤t->vcpu_flags);
+ set_bit(_VPF_migrating, ¤t->pause_flags);
cpu_raise_softirq(cpu, SCHEDULE_SOFTIRQ);
}
}
vcpu_runstate_change(v, RUNSTATE_runnable, NOW());
SCHED_OP(wake, v);
}
- else if ( !test_bit(_VCPUF_blocked, &v->vcpu_flags) )
+ else if ( !test_bit(_VPF_blocked, &v->pause_flags) )
{
if ( v->runstate.state == RUNSTATE_blocked )
vcpu_runstate_change(v, RUNSTATE_offline, NOW());
* regions are strictly serialised.
*/
if ( v->is_running ||
- !test_and_clear_bit(_VCPUF_migrating, &v->vcpu_flags) )
+ !test_and_clear_bit(_VPF_migrating, &v->pause_flags) )
{
vcpu_schedule_unlock_irqrestore(v, flags);
return;
{
vcpu_schedule_lock_irq(v);
if ( v->is_running )
- set_bit(_VCPUF_migrating, &v->vcpu_flags);
+ set_bit(_VPF_migrating, &v->pause_flags);
vcpu_schedule_unlock_irq(v);
- if ( test_bit(_VCPUF_migrating, &v->vcpu_flags) )
+ if ( test_bit(_VPF_migrating, &v->pause_flags) )
{
vcpu_sleep_nosync(v);
vcpu_migrate(v);
v->cpu_affinity = *affinity;
if ( !cpu_isset(v->processor, v->cpu_affinity) )
- set_bit(_VCPUF_migrating, &v->vcpu_flags);
+ set_bit(_VPF_migrating, &v->pause_flags);
vcpu_schedule_unlock_irq(v);
- if ( test_bit(_VCPUF_migrating, &v->vcpu_flags) )
+ if ( test_bit(_VPF_migrating, &v->pause_flags) )
{
vcpu_sleep_nosync(v);
vcpu_migrate(v);
struct vcpu *v = current;
local_event_delivery_enable();
- set_bit(_VCPUF_blocked, &v->vcpu_flags);
+ set_bit(_VPF_blocked, &v->pause_flags);
/* Check for events /after/ blocking: avoids wakeup waiting race. */
if ( local_events_need_delivery() )
{
- clear_bit(_VCPUF_blocked, &v->vcpu_flags);
+ clear_bit(_VPF_blocked, &v->pause_flags);
}
else
{
if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) )
return -EFAULT;
- set_bit(_VCPUF_blocked, &v->vcpu_flags);
+ set_bit(_VPF_blocked, &v->pause_flags);
v->is_polling = 1;
d->is_polling = 1;
out:
v->is_polling = 0;
- clear_bit(_VCPUF_blocked, &v->vcpu_flags);
+ clear_bit(_VPF_blocked, &v->pause_flags);
return rc;
}
ASSERT(prev->runstate.state == RUNSTATE_running);
vcpu_runstate_change(
prev,
- (test_bit(_VCPUF_blocked, &prev->vcpu_flags) ? RUNSTATE_blocked :
+ (test_bit(_VPF_blocked, &prev->pause_flags) ? RUNSTATE_blocked :
(vcpu_runnable(prev) ? RUNSTATE_runnable : RUNSTATE_offline)),
now);
/* Check for migration request /after/ clearing running flag. */
smp_mb();
- if ( unlikely(test_bit(_VCPUF_migrating, &prev->vcpu_flags)) )
+ if ( unlikely(test_bit(_VPF_migrating, &prev->pause_flags)) )
vcpu_migrate(prev);
}
static inline void vcpu_kick(struct vcpu *v)
{
/*
- * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
+ * NB1. 'pause_flags' and 'processor' must be checked /after/ update of
* pending flag. These values may fluctuate (after all, we hold no
* locks) but the key insight is that each change will cause
* evtchn_upcall_pending to be polled.
static inline void vcpu_kick(struct vcpu *v)
{
/*
- * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
+ * NB1. 'pause_flags' and 'processor' must be checked /after/ update of
* pending flag. These values may fluctuate (after all, we hold no
* locks) but the key insight is that each change will cause
* evtchn_upcall_pending to be polled.
static inline void vcpu_kick(struct vcpu *v)
{
/*
- * NB1. 'vcpu_flags' and 'processor' must be checked /after/ update of
+ * NB1. 'pause_flags' and 'processor' must be checked /after/ update of
* pending flag. These values may fluctuate (after all, we hold no
* locks) but the key insight is that each change will cause
* evtchn_upcall_pending to be polled.
do { \
if ( condition ) \
break; \
- set_bit(_VCPUF_blocked_in_xen, ¤t->vcpu_flags); \
+ set_bit(_VPF_blocked_in_xen, ¤t->pause_flags); \
mb(); /* set blocked status /then/ re-evaluate condition */ \
if ( condition ) \
{ \
- clear_bit(_VCPUF_blocked_in_xen, ¤t->vcpu_flags); \
+ clear_bit(_VPF_blocked_in_xen, ¤t->pause_flags); \
break; \
} \
raise_softirq(SCHEDULE_SOFTIRQ); \
#define prepare_wait_on_xen_event_channel(port) \
do { \
- set_bit(_VCPUF_blocked_in_xen, ¤t->vcpu_flags); \
+ set_bit(_VPF_blocked_in_xen, ¤t->pause_flags); \
raise_softirq(SCHEDULE_SOFTIRQ); \
mb(); /* set blocked status /then/ caller does his work */ \
} while ( 0 )
/* Avoid NMI reentry by allowing NMIs to be masked for short periods. */
bool_t nmi_masked;
- unsigned long vcpu_flags;
-
+ unsigned long pause_flags;
atomic_t pause_count;
u16 virq_to_evtchn[NR_VIRQS];
(_v) = (_v)->next_in_list )
/*
- * Per-VCPU flags (vcpu_flags).
+ * Per-VCPU pause flags.
*/
/* Domain is blocked waiting for an event. */
-#define _VCPUF_blocked 0
-#define VCPUF_blocked (1UL<<_VCPUF_blocked)
+#define _VPF_blocked 0
+#define VPF_blocked (1UL<<_VPF_blocked)
/* VCPU is offline. */
-#define _VCPUF_down 1
-#define VCPUF_down (1UL<<_VCPUF_down)
+#define _VPF_down 1
+#define VPF_down (1UL<<_VPF_down)
/* VCPU is blocked awaiting an event to be consumed by Xen. */
-#define _VCPUF_blocked_in_xen 2
-#define VCPUF_blocked_in_xen (1UL<<_VCPUF_blocked_in_xen)
+#define _VPF_blocked_in_xen 2
+#define VPF_blocked_in_xen (1UL<<_VPF_blocked_in_xen)
/* VCPU affinity has changed: migrating to a new CPU. */
-#define _VCPUF_migrating 3
-#define VCPUF_migrating (1UL<<_VCPUF_migrating)
+#define _VPF_migrating 3
+#define VPF_migrating (1UL<<_VPF_migrating)
static inline int vcpu_runnable(struct vcpu *v)
{
- return (!v->vcpu_flags &&
- !atomic_read(&v->pause_count) &&
- !atomic_read(&v->domain->pause_count));
+ return !(v->pause_flags |
+ atomic_read(&v->pause_count) |
+ atomic_read(&v->domain->pause_count));
}
void vcpu_pause(struct vcpu *v);
static inline void vcpu_unblock(struct vcpu *v)
{
- if ( test_and_clear_bit(_VCPUF_blocked, &v->vcpu_flags) )
+ if ( test_and_clear_bit(_VPF_blocked, &v->pause_flags) )
vcpu_wake(v);
}